import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from scipy import signal
Questions:
How many hours do you spend on average per week to:
def build_signal_fft(sig, t_seq, name, build_phase=False, sr=128, ts=1):
rfft_sig = np.fft.rfft(sig)
mag_spectrum = np.abs(rfft_sig)
xf = np.fft.rfftfreq(sr*ts, 1 / sr)
ft = np.fft.fft(sig)
half_len = int(len(t_seq) / 2)
if (build_phase):
fig, axs = plt.subplots(3, 1, figsize=([20, 16]))
else:
fig, axs = plt.subplots(2, 1, figsize=([20, 13]))
plt.locator_params(axis='x', nbins=40)
axs[0].set_title(f'Input signal - {name}')
axs[0].plot(t_seq, sig, color = 'r', label='Filtered signal')
axs[0].set_xlabel('Time (s)')
axs[0].set_ylabel('Amplitude (V)')
axs[0].legend(bbox_to_anchor=[1, 1],
ncol=2, title="Legend")
axs[0].grid(True)
axs[1].set_title(f'Amplitude spectrum - {name}')
axs[1].stem(xf, mag_spectrum, markerfmt=None)
axs[1].set_xlabel('Frequency (Hz)')
axs[1].set_ylabel('Amplitude ')
axs[1].legend(bbox_to_anchor=[1, 1],
ncol=2, title="Legend")
axs[1].grid(True)
if (build_phase):
phase = np.angle(ft)
axs[2].set_title(f'Phase spectrum - {name}')
axs[2].stem(t_seq, phase)
axs[2].set_xlabel('Frequency (Hz)')
axs[2].set_ylabel('Phase ')
axs[2].legend(bbox_to_anchor=[1, 1],
ncol=2, title="Legend")
axs[2].grid(True)
# Initial data loadings
human_idx = '01'
filename = f'./data/movement_data/NM00{human_idx}.tsv'
markers = {'01': 12}
fs = 100
rows_num = 36000
axes = ['X', 'Y', 'Z']
human = 1
df = pd.read_csv(filename, delimiter='\t', encoding='utf-8', skiprows=10, low_memory=False)
# Dict with data for 1 experiment
raw_data = {}
# Human_idx in raw data to find markers(people) count
# Separate different people and add time column
for m in range(1, markers[human_idx] + 1):
time = np.linspace(0, rows_num / fs, rows_num)
human_df = df[[f'S{m} X', f'S{m} Y', f'S{m} Z']]
human_df.columns = axes
human_df.insert(0, 'Time (s)', time)
raw_data[m] = human_df
# Calculate displacement (difference before two positions)
for m in range(1, markers[human_idx]):
for position in axes:
displ = np.roll(raw_data[m][position], -1) - raw_data[m][position]
displ = displ.drop(displ.index[len(displ)-1])
raw_data[m].insert(0, f'd{position}', displ)
# Plot current data
fig, axs = plt.subplots(3, 1, figsize=(25 ,15))
# axs[0].plot(raw_data[human]['Time (s)'], raw_data[human]['dX'], label='Sick EEG')
axs[0].plot(raw_data[human]['Time (s)'], raw_data[human]['X'], label='Sick EEG')
axs[0].set_title('Amplitude data for X axis - Human 9')
axs[0].set_xlabel('Time (s)')
axs[0].set_ylabel('Volume (mm)')
axs[0].grid(True)
# axs[1].plot(raw_data[human]['Time (s)'], raw_data[human]['dY'], label='Sick EEG')
axs[1].plot(raw_data[human]['Time (s)'], raw_data[human]['Y'], label='Sick EEG')
axs[1].set_title('Amplitude data for Y axis - Human 9')
axs[1].set_xlabel('Time (s)')
axs[1].set_ylabel('Volume (mm)')
axs[1].grid(True)
# axs[2].plot(raw_data[human]['Time (s)'], raw_data[human]['dZ'], label='Sick EEG')
axs[2].plot(raw_data[human]['Time (s)'], raw_data[human]['Z'], label='Sick EEG')
axs[2].set_title('Amplitude data for Z axis - Human 9')
axs[2].set_xlabel('Time (s)')
axs[2].set_ylabel('Volume (mm)')
axs[2].grid(True)
# Read music sequence data and devide dataset in those
stimuli_df = pd.read_csv('./data/sound_data/nm15_song_order.csv')
#First and last segments always were Silence - 60s
#1 - Middle Silence - 60s
#2 - Meditation - 60s
#3 - Salsa - 60s
#4 - EDM - 60s
#Total duration of recording 360s
stim_types = {0:'Silence 1', 1: 'Middle Silence', 2: 'Meditation', 3: 'Salsa', 4:'EDM', 5:'Silence 2'}
stimuli_df.head()
| 1 | 2 | 3 | 4 | 5 | |
|---|---|---|---|---|---|
| 0 | A | 3 | 1 | 2 | 4 |
| 1 | B | 1 | 2 | 4 | 3 |
| 2 | C | 1 | 2 | 3 | 4 |
| 3 | D | 2 | 3 | 4 | 1 |
| 4 | E | 1 | 2 | 3 | 4 |
stimuled_humans = {}
parts_amount = 6
ticks_per_60_second = fs * 60
# For each person in stage
for person in range(1, markers[human_idx] + 1):
# Getting ready for stimules parts
stimules = {
0: None,
1: None,
2: None,
3: None,
4: None,
5: None
}
# 0 and 5 are Silence
stimuli_order = list(stimuli_df.iloc[int(human_idx) - 1])
stimuli_order[0] = 5
stimuli_order.append(0)
print(stimuli_order)
# Fill in different parts of signals for each stimuli type / silence
for s in range(len(stimuli_order)):
part = raw_data[person].iloc[stimuli_order[s] * ticks_per_60_second : (stimuli_order[s]+1) * ticks_per_60_second]
stimules[s] = part
stimuled_humans[person] = stimules
stimuled_humans[1][1]
[5, 3, 1, 2, 4, 0] [5, 3, 1, 2, 4, 0] [5, 3, 1, 2, 4, 0] [5, 3, 1, 2, 4, 0] [5, 3, 1, 2, 4, 0] [5, 3, 1, 2, 4, 0] [5, 3, 1, 2, 4, 0] [5, 3, 1, 2, 4, 0] [5, 3, 1, 2, 4, 0] [5, 3, 1, 2, 4, 0] [5, 3, 1, 2, 4, 0] [5, 3, 1, 2, 4, 0]
| dZ | dY | dX | Time (s) | X | Y | Z | |
|---|---|---|---|---|---|---|---|
| 18000 | -0.006 | -0.080 | -0.031 | 180.005000 | -1149.069 | 1089.637 | 1730.268 |
| 18001 | -0.006 | -0.074 | -0.024 | 180.015000 | -1149.100 | 1089.557 | 1730.262 |
| 18002 | -0.002 | -0.038 | -0.033 | 180.025001 | -1149.124 | 1089.483 | 1730.256 |
| 18003 | -0.014 | -0.045 | -0.053 | 180.035001 | -1149.157 | 1089.445 | 1730.254 |
| 18004 | -0.037 | -0.014 | -0.072 | 180.045001 | -1149.210 | 1089.400 | 1730.240 |
| ... | ... | ... | ... | ... | ... | ... | ... |
| 23995 | 0.013 | 0.019 | -0.110 | 239.956665 | -1150.961 | 1100.529 | 1724.918 |
| 23996 | 0.005 | 0.032 | -0.096 | 239.966666 | -1151.071 | 1100.548 | 1724.931 |
| 23997 | 0.002 | 0.015 | -0.075 | 239.976666 | -1151.167 | 1100.580 | 1724.936 |
| 23998 | -0.013 | 0.024 | -0.089 | 239.986666 | -1151.242 | 1100.595 | 1724.938 |
| 23999 | -0.013 | 0.032 | -0.088 | 239.996667 | -1151.331 | 1100.619 | 1724.925 |
6000 rows × 7 columns
fig, axs = plt.subplots(3, 1, figsize=(25 ,15))
sig_colors = ['#34495E', '#884EA0', '#3498DB', '#2ECC71', '#F4D03F', '#34495E']
human = 2
for i in range(parts_amount):
color = '#555015'
axs[0].plot(stimuled_humans[human][i]['Time (s)'], stimuled_humans[human][i]['dX'], color = sig_colors[i])
axs[1].plot(stimuled_humans[human][i]['Time (s)'], stimuled_humans[human][i]['dY'], color = sig_colors[i])
axs[2].plot(stimuled_humans[human][i]['Time (s)'], stimuled_humans[human][i]['dZ'], color = sig_colors[i])
for j in range(len(axes)):
axs[j].set_title(f'Amplitude for splitted {axes[j]} axis data - Human {human}')
axs[j].set_xlabel('Time (s)')
axs[j].set_ylabel('Volume (mm)')
axs[j].grid(True)
signals = [[stimuled_humans[human][i]['dX'], stimuled_humans[human][i]['Time (s)'], 60] for i in range(parts_amount)]
for i in range(parts_amount):
build_signal_fft(stimuled_humans[human][i]['dX'], stimuled_humans[human][i]['Time (s)'], stim_types[i], sr=100, ts=60)
f, t, Sxx = signal.spectrogram(np.array(signals[i][0]), 100, window=('exponential', 0.2), nperseg=200)
# f, t, Sxx = signal.spectrogram(np.array(signals[i][0]))
fig, axs = plt.subplots(1, 1, figsize=([20, 5]))
axs.set_title(f'Spectogram signal - {stim_types[i]} X - Human {human}')
axs.pcolormesh(t, f, Sxx, shading='gouraud')
axs.set_xlabel('Time [sec]')
axs.set_ylabel('Frequency [Hz]')
axs.grid(True)
/home/botsula/.local/lib/python3.8/site-packages/numpy/core/_asarray.py:102: UserWarning: Warning: converting a masked element to nan. return array(a, dtype, copy=False, order=order) No handles with labels found to put in legend. No handles with labels found to put in legend. No handles with labels found to put in legend. No handles with labels found to put in legend. No handles with labels found to put in legend. No handles with labels found to put in legend.
# Create df with statistics (mean, std) for human
def create_statistics_human(stimuled_humans, human):
statistics_for_human = {
'stimul': [stim_types[i] for i in range(parts_amount)],
'X_avg': [np.mean(stimuled_humans[human][i]['dX']) for i in range(parts_amount)],
'Y_avg': [np.mean(stimuled_humans[human][i]['dY']) for i in range(parts_amount)],
'Z_avg': [np.mean(stimuled_humans[human][i]['dZ']) for i in range(parts_amount)],
'X_std': [np.std(stimuled_humans[human][i]['dX']) for i in range(parts_amount)],
'Y_std': [np.std(stimuled_humans[human][i]['dY']) for i in range(parts_amount)],
'Z_std': [np.std(stimuled_humans[human][i]['dZ']) for i in range(parts_amount)]
}
statistics_for_human_df = pd.DataFrame(data=statistics_for_human)
return statistics_for_human_df
# Plot dataframe made in create_statistics_human()
def plot_stats(statistics_for_human_df, human):
fig, axs = plt.subplots(2, 6, figsize=([30, 10]))
for idx, row in statistics_for_human_df.iterrows():
name = row['stimul']
min_average = min([statistics_for_human_df['X_avg'].min(), statistics_for_human_df['Y_avg'].min(), statistics_for_human_df['Z_avg'].min()])
max_average = max([statistics_for_human_df['X_avg'].max(), statistics_for_human_df['Y_avg'].max(), statistics_for_human_df['Z_avg'].max()])
av = statistics_for_human_df.columns.delete(0)
axs[0, idx].set_title(f'Mean - {name}')
axs[1, idx].set_title(f'STD - {name}')
axs[0, idx].bar(list(statistics_for_human_df.columns.delete(0))[:3], row.values[1:4], color=sig_colors[idx])
axs[1, idx].bar(statistics_for_human_df.columns.delete(0)[:3], row.values[4:], color=sig_colors[idx])
axs[0, idx].set_ylabel('Value')
axs[1, idx].set_ylabel('Value')
axs[0, idx].grid(True)
axs[1, idx].grid(True)
axs[0, idx].set_ylim([min_average, max_average])
axs[1, idx].set_ylim([-0.01, 0.1])
fig.savefig(f'./img/human_{human}.jpeg')
for i in range(1, 12):
df = create_statistics_human(stimuled_humans, i)
plot_stats(df, i)
['X_avg', 'Y_avg', 'Z_avg'] ['Silence 1' -7.984664110685812e-05 -0.0013040506751124962] ['X_avg', 'Y_avg', 'Z_avg'] ['Middle Silence' -0.0003916666666666894 0.0018356666666666873] ['X_avg', 'Y_avg', 'Z_avg'] ['Meditation' 0.0012749999999999773 0.005316500000000019] ['X_avg', 'Y_avg', 'Z_avg'] ['Salsa' -0.0035503333333333177 -0.004773000000000025] ['X_avg', 'Y_avg', 'Z_avg'] ['EDM' -0.002022833333333324 0.004329499999999977] ['X_avg', 'Y_avg', 'Z_avg'] ['Silence 2' -0.0004181666666666691 0.00551733333333334] ['X_avg', 'Y_avg', 'Z_avg'] ['Silence 1' -0.003792132022003671 0.0011548591431905313] ['X_avg', 'Y_avg', 'Z_avg'] ['Middle Silence' -0.0006031666666666713 0.0014893333333333392] ['X_avg', 'Y_avg', 'Z_avg'] ['Meditation' -0.0014784999999999967 -0.0025994999999999968] ['X_avg', 'Y_avg', 'Z_avg'] ['Salsa' 0.0012684999999999984 -0.0031763333333333322] ['X_avg', 'Y_avg', 'Z_avg'] ['EDM' -0.00048649999999999713 0.0016416666666666515] ['X_avg', 'Y_avg', 'Z_avg'] ['Silence 2' -0.0005819999999999937 -0.0035733333333333424] ['X_avg', 'Y_avg', 'Z_avg'] ['Silence 1' -0.00014935822637106435 -0.001061010168361376] ['X_avg', 'Y_avg', 'Z_avg'] ['Middle Silence' -0.0003616666666666693 -4.549999999998515e-05] ['X_avg', 'Y_avg', 'Z_avg'] ['Meditation' -0.000966999999999994 -0.0011644999999999755] ['X_avg', 'Y_avg', 'Z_avg'] ['Salsa' -0.0010223333333333358 0.0030839999999999843] ['X_avg', 'Y_avg', 'Z_avg'] ['EDM' 0.0002923333333333365 -0.004340166666666694] ['X_avg', 'Y_avg', 'Z_avg'] ['Silence 2' 0.0030323333333333266 0.0065388333333333245] ['X_avg', 'Y_avg', 'Z_avg'] ['Silence 1' 0.0006334389064844065 0.0015924320720120204] ['X_avg', 'Y_avg', 'Z_avg'] ['Middle Silence' 0.00012199999999999515 -0.001358166666666648] ['X_avg', 'Y_avg', 'Z_avg'] ['Meditation' -0.0003361666666666376 0.0024641666666666805] ['X_avg', 'Y_avg', 'Z_avg'] ['Salsa' 0.00021466666666666848 0.0008258333333333212] ['X_avg', 'Y_avg', 'Z_avg'] ['EDM' 0.00015933333333331727 -0.0007008333333333591] ['X_avg', 'Y_avg', 'Z_avg'] ['Silence 2' -0.001856833333333346 0.004370166666666667] ['X_avg', 'Y_avg', 'Z_avg'] ['Silence 1' 0.0010001666944490749 -0.0012347057842973846] ['X_avg', 'Y_avg', 'Z_avg'] ['Middle Silence' 0.0011263333333333396 0.0006398333333333331] ['X_avg', 'Y_avg', 'Z_avg'] ['Meditation' -0.00011600000000002334 -0.001733833333333332] ['X_avg', 'Y_avg', 'Z_avg'] ['Salsa' -0.0010626666666666627 -0.0009393333333333326] ['X_avg', 'Y_avg', 'Z_avg'] ['EDM' 0.001281500000000013 -0.000772333333333331] ['X_avg', 'Y_avg', 'Z_avg'] ['Silence 2' 0.00040050000000000334 0.0032326666666666645] ['X_avg', 'Y_avg', 'Z_avg'] ['Silence 1' 0.0001581930321720307 0.0010300050008334727] ['X_avg', 'Y_avg', 'Z_avg'] ['Middle Silence' 0.00046900000000000354 0.0014403333333333327] ['X_avg', 'Y_avg', 'Z_avg'] ['Meditation' -0.0011194999999999975 0.003680999999999997] ['X_avg', 'Y_avg', 'Z_avg'] ['Salsa' 0.002046333333333327 -0.00017233333333333196] ['X_avg', 'Y_avg', 'Z_avg'] ['EDM' -0.001300000000000002 0.0019230000000000017] ['X_avg', 'Y_avg', 'Z_avg'] ['Silence 2' -0.00022633333333333403 0.0026764999999999996] ['X_avg', 'Y_avg', 'Z_avg'] ['Silence 1' 0.00044274045674278195 -0.0003238873145524274] ['X_avg', 'Y_avg', 'Z_avg'] ['Middle Silence' -0.0006469999999999913 -0.0013754999999999976] ['X_avg', 'Y_avg', 'Z_avg'] ['Meditation' 0.0014590000000000033 -0.00024983333333333726] ['X_avg', 'Y_avg', 'Z_avg'] ['Salsa' 0.002443499999999991 -0.0020625] ['X_avg', 'Y_avg', 'Z_avg'] ['EDM' -0.0016783333333333416 -0.005789166666666664] ['X_avg', 'Y_avg', 'Z_avg'] ['Silence 2' 0.0020175000000000032 0.005807666666666668] ['X_avg', 'Y_avg', 'Z_avg'] ['Silence 1' -0.0006422737122853919 3.500583430571895e-05] ['X_avg', 'Y_avg', 'Z_avg'] ['Middle Silence' 0.001595333333333315 -0.001727999999999999] ['X_avg', 'Y_avg', 'Z_avg'] ['Meditation' -0.0025019999999999908 -0.004485333333333332] ['X_avg', 'Y_avg', 'Z_avg'] ['Salsa' 0.0010815000000000054 0.002335666666666666] ['X_avg', 'Y_avg', 'Z_avg'] ['EDM' 0.00028450000000001787 0.002107999999999999] ['X_avg', 'Y_avg', 'Z_avg'] ['Silence 2' -0.005054166666666674 -0.001764166666666668] ['X_avg', 'Y_avg', 'Z_avg'] ['Silence 1' 0.0009373228871478659 0.0028098016336056] ['X_avg', 'Y_avg', 'Z_avg'] ['Middle Silence' -0.0001389999999999721 5.699999999999742e-05] ['X_avg', 'Y_avg', 'Z_avg'] ['Meditation' 0.00011549999999999728 0.0016160000000000044] ['X_avg', 'Y_avg', 'Z_avg'] ['Salsa' 0.0002513333333333018 5.233333333332742e-05] ['X_avg', 'Y_avg', 'Z_avg'] ['EDM' 0.001189833333333316 0.001271500000000003] ['X_avg', 'Y_avg', 'Z_avg'] ['Silence 2' 0.002189166666666665 0.005836166666666656] ['X_avg', 'Y_avg', 'Z_avg'] ['Silence 1' -0.001871145190865148 -0.0017021170195032527] ['X_avg', 'Y_avg', 'Z_avg'] ['Middle Silence' 0.00047950000000000157 0.008857500000000016] ['X_avg', 'Y_avg', 'Z_avg'] ['Meditation' -0.00016183333333333395 0.0007444999999999974] ['X_avg', 'Y_avg', 'Z_avg'] ['Salsa' -0.0013991666666666636 -0.0006290000000000001] ['X_avg', 'Y_avg', 'Z_avg'] ['EDM' -0.0006339999999999956 0.0013368333333333264] ['X_avg', 'Y_avg', 'Z_avg'] ['Silence 2' -0.0019166666666666666 0.002335999999999994] ['X_avg', 'Y_avg', 'Z_avg'] ['Silence 1' -0.0011253542257042895 -0.0017541256876146066] ['X_avg', 'Y_avg', 'Z_avg'] ['Middle Silence' -0.0008629999999999995 -0.0010273333333333313] ['X_avg', 'Y_avg', 'Z_avg'] ['Meditation' 0.0008701666666666673 0.004382166666666668] ['X_avg', 'Y_avg', 'Z_avg'] ['Salsa' -0.00019133333333333743 -0.00038183333333332333] ['X_avg', 'Y_avg', 'Z_avg'] ['EDM' 2.7833333333338334e-05 -4.083333333333409e-05] ['X_avg', 'Y_avg', 'Z_avg'] ['Silence 2' -0.000679333333333337 -0.004056333333333347]
def moving_average(a, n=3) :
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
# print(ret[:n])
return ret[n - 1:] / n
# return ret/ n
ma_coef = 50
human = 2
n = 0
def plot_and_find_ma(stimuled_humans, human, n):
out_sig = moving_average(np.array(stimuled_humans[human][n]['dY']), ma_coef)
fig, axs = plt.subplots(3, 1, figsize=([30, 15]))
max_v = max([stimuled_humans[human][n]['dX'].max(), stimuled_humans[human][n]['dY'].max(), stimuled_humans[human][n]['dZ'].max()])
min_v = min([stimuled_humans[human][n]['dX'].min(), stimuled_humans[human][n]['dY'].min(), stimuled_humans[human][n]['dZ'].min()])
for i in range(len(axes)):
out_sig = moving_average(np.array(stimuled_humans[human][n][f'd{axes[i]}']), ma_coef)
axs[i].set_title(f'Human: {human}, Stimul: {stim_types[n]}, Axis: {axes[i]}')
axs[i].plot(stimuled_humans[human][n]['Time (s)'][ma_coef-1:], out_sig, color = 'r', label='Filtered signal')
axs[i].set_xlabel('Time (s)')
axs[i].set_ylabel('Amplitude (V)')
# axs[i].set_ylim([min_v * 0.5,max_v * 0.5])
axs[i].set_ylim([-0.3, 0.3])
axs[i].grid(True)
# f, t, Sxx = signal.spectrogram(np.array(stimuled_humans[human][n][f'd{axes[i]}']), 100)
f, t, Sxx = signal.spectrogram(np.array(stimuled_humans[human][n][f'd{axes[i]}']), 100, window=('exponential', 0.5), nperseg=200)
fg, ax = plt.subplots(1, 1, figsize=([30, 5]))
ax.set_title(f'Spectogram signal - 0.1s Window')
ax.pcolormesh(t, f, Sxx, shading='gouraud')
ax.set_xlabel('Time [sec]')
ax.set_ylabel('Frequency [Hz]')
ax.grid(True)
fig.savefig(f'./img/MA_human_{human}_{stim_types[n]}.jpeg')
# out_sig
human = 3
for i in range(parts_amount):
plot_and_find_ma(stimuled_humans, human, i)
<ipython-input-240-1d3184eb9768>:7: RuntimeWarning: More than 20 figures have been opened. Figures created through the pyplot interface (`matplotlib.pyplot.figure`) are retained until explicitly closed and may consume too much memory. (To control this warning, see the rcParam `figure.max_open_warning`). fig, axs = plt.subplots(3, 1, figsize=([30, 15]))
exp_df = pd.read_csv('./data/demographics/self_report_nm15.csv')
exp_df.head(36)
| Session | Participant | Age | Sex | Listen to music | Create music | Dance | Exercise | Tiresome | Motion compared to standstill | Motion compared to music | Eyes open | Locked knees | Comments | |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 0 | 1 | 1 | 17 | M | 35.0 | 5.0 | 4.0 | 0.0 | 1 | 4 | 1 | Y | N | NaN |
| 1 | 1 | 2 | 16 | M | 32.0 | 4.0 | 3.0 | 0.0 | 2 | 4 | 4 | B | B | NaN |
| 2 | 1 | 3 | 17 | M | 24.0 | 8.0 | 4.0 | 2.0 | 3 | 3 | 2 | Y | B | NaN |
| 3 | 1 | 4 | 19 | F | 7.0 | 7.0 | 0.5 | 2.0 | 2 | 4 | 4 | B | Y | NaN |
| 4 | 1 | 5 | 19 | M | 6.0 | 5.0 | 2.0 | 8.0 | 4 | 3 | 4 | Y | N | NaN |
| 5 | 1 | 6 | 18 | M | 14.0 | 0.0 | 0.0 | 0.0 | 2 | 4 | 3 | Y | B | NaN |
| 6 | 1 | 7 | 37 | F | 1.0 | 0.0 | 0.0 | 4.0 | 2 | 4 | 3 | Y | Y | NaN |
| 7 | 1 | 8 | 41 | F | 8.0 | 0.0 | 0.0 | 12.0 | 1 | 2 | 3 | N | N | NaN |
| 8 | 1 | 9 | 21 | F | 12.0 | 0.0 | 4.0 | 3.0 | 1 | 2 | 4 | B | N | NaN |
| 9 | 1 | 10 | 18 | M | 60.0 | 3.0 | 2.0 | 7.0 | 3 | 4 | 3 | Y | N | NaN |
| 10 | 1 | x | 19 | F | 20.0 | 0.0 | 5.0 | 0.0 | 2 | 3 | 3 | Y | B | (didn't write the participant number) |
| 11 | 1 | x | 19 | F | 8.0 | 0.0 | 1.0 | 5.5 | 3 | 4 | 3 | Y | B | (didn't write the participant number) |
| 12 | 2 | 2 | 32 | M | 15.0 | 2.0 | 0.0 | 1.0 | 1 | 3 | 1 | Y | Y | NaN |
| 13 | 2 | 5 | 18 | M | 48.0 | 0.0 | 6.0 | 6.0 | 2 | 3 | 3 | Y | Y | NaN |
| 14 | 2 | 6 | 37 | F | 1.0 | 0.0 | 0.0 | 4.0 | 2 | 4 | 3 | Y | Y | NaN |
| 15 | 2 | 8 | 43 | F | 4.0 | 0.0 | 1.0 | 0.5 | 3 | 2 | 2 | Y | N | NaN |
| 16 | 2 | 9 | 25 | M | 3.0 | 0.0 | 0.0 | 5.0 | 4 | 4 | 2 | Y | B | NaN |
| 17 | 2 | 10 | 18 | M | 10.0 | 20.0 | 5.0 | 5.0 | 3 | 4 | 2 | Y | B | NaN |
| 18 | 2 | 11 | 19 | F | 16.0 | 10.0 | 2.0 | 5.0 | 4 | 3 | 3 | Y | Y | NaN |
| 19 | 2 | 12 | 20 | M | 14.0 | 14.0 | 2.0 | 3.0 | 2 | 4 | 2 | Y | B | NaN |
| 20 | 3 | 1 | 62 | M | 12.0 | 6.0 | 0.0 | 6.0 | 1 | 3 | 4 | N | N | NaN |
| 21 | 3 | 2 | 18 | M | 9.0 | 0.0 | 2.0 | 0.0 | 3 | 4 | 4 | Y | N | NaN |
| 22 | 3 | 3 | 18 | M | 4.0 | 5.0 | 0.0 | 3.0 | 3 | 4 | 4 | Y | Y | NaN |
| 23 | 3 | 4 | 18 | M | 5.0 | 5.0 | 0.0 | 5.0 | 3 | 3 | 3 | N | Y | NaN |
| 24 | 3 | 5 | 19 | M | 10.0 | 16.0 | 0.0 | 5.0 | 1 | 3 | 1 | Y | B | NaN |
| 25 | 3 | 6 | 20 | F | 40.0 | 7.0 | 3.0 | 2.0 | 5 | 5 | 3 | Y | B | NaN |
| 26 | 3 | 9 | 37 | M | 8.0 | 20.0 | 0.5 | 1.0 | 3 | 3 | 4 | Y | Y | NaN |
| 27 | 3 | 10 | 18 | M | 7.0 | 0.5 | 0.0 | 9.0 | 2 | 2 | 1 | Y | B | NaN |
| 28 | 3 | 11 | 22 | M | 20.0 | 14.0 | 0.0 | 4.5 | 3 | 3 | 1 | Y | B | NaN |
| 29 | 3 | 12 | 18 | M | 25.0 | 20.0 | 1.0 | 5.0 | 2 | 3 | 4 | Y | Y | NaN |
| 30 | 4 | 1 | 18 | F | 35.0 | 5.0 | 5.0 | 6.0 | 5 | 5 | 3 | B | B | NaN |
| 31 | 4 | 2 | 24 | F | 2.0 | 6.0 | 2.0 | 6.0 | 1 | 3 | 3 | N | N | NaN |
| 32 | 4 | 3 | 24 | F | 50.0 | 0.0 | 10.0 | 20.0 | 2 | 3 | 4 | Y | Y | NaN |
| 33 | 4 | 4 | 23 | F | 10.0 | 15.0 | 0.5 | 0.5 | 2 | 2 | 3 | Y | B | NaN |
| 34 | 4 | 5 | 18 | F | 35.0 | 28.0 | 4.0 | 10.0 | 2 | 4 | 4 | B | B | NaN |
| 35 | 4 | 6 | 29 | F | 20.0 | 0.0 | 1.0 | 4.0 | 2 | 3 | 4 | Y | N | NaN |
# Correlation
young = [1,2,3,4,5,6,9,10]
old = [7,8]
h1 = 7
h2 = 5
n = 2
h1_part = stimuled_humans[h1][n]
h2_part = stimuled_humans[h2][n]
h1_another_part = stimuled_humans[h1][n+1]
h2_another_part = stimuled_humans[h2][n+1]
a = np.correlate(h1_part['dX'], h2_part['dX'])
b = np.correlate(h1_part['dX'], h1_another_part['dX'])
c = np.correlate(h1_part['dX'], h2_another_part['dX'])
print(stim_types[n], a,b,c)
print(np.mean(abs(h1_part['dX'])), np.mean(abs(h2_part['dX'])), '???',np.mean(abs(h1_another_part['dX'])),np.mean(abs(h2_another_part['dX'])) )
Meditation [0.041597] [0.513878] [0.139809] 0.02669766666666694 0.025624666666666258 ??? 0.030270833333333618 0.025577333333333514
a = np.correlate(h1_part['dX'], h2_part['dX'])
b = np.correlate(h1_part['dX'], h1_another_part['dX'])
c = np.correlate(h1_part['dX'], h2_another_part['dX'])
print(stim_types[n], a,b,c)
print(np.average(h1_part['dX']), np.average(h2_part['dX']), '???',np.average(h1_another_part['dX']),np.average(h2_another_part['dX']) )
musicians = [1,2,3,6,10]
not_musicians = [4,5,7,8,9]
file = '01'
young = [1,2,3,4,5,6,9,10]
old = [7,8]
# group_1 = ['Under 25', young]
# group_2 = ['After 26', old]
group_1 = ['Musician', musicians]
group_2 = ['Not musician', not_musicians]
# Find statistics for class of people
statistics = pd.DataFrame()
for m in range(1, markers[file]):
for n in range(parts_amount):
part = stimuled_humans[m][n]
part_dict = {}
for ax in range(len(axes)):
part_dict[f'{axes[ax]}_mean'] = np.mean(abs(part[f'd{axes[ax]}']))
part_dict[f'{axes[ax]}_std'] = np.std(abs(part[f'd{axes[ax]}']))
part_dict[f'{axes[ax]}_var'] = np.std(abs(part[f'd{axes[ax]}'])) / np.mean(abs(part[f'd{axes[ax]}']))
part_dict['part'] = stim_types[n]
part_dict['human'] = m
if m in group_1[1]:
part_dict['class'] = group_1[0]
statistics = statistics.append(part_dict, ignore_index=True)
elif m in group_2[1]:
part_dict['class'] = group_2[0]
statistics = statistics.append(part_dict, ignore_index=True)
statistics.head()
| X_mean | X_std | X_var | Y_mean | Y_std | Y_var | Z_mean | Z_std | Z_var | class | human | part | |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 0 | 0.037540 | 0.033309 | 0.887285 | 0.050823 | 0.042391 | 0.834080 | 0.019133 | 0.035427 | 1.851580 | Musician | 1.0 | Silence 1 |
| 1 | 0.043139 | 0.035831 | 0.830594 | 0.066146 | 0.054212 | 0.819582 | 0.022525 | 0.050146 | 2.226249 | Musician | 1.0 | Middle Silence |
| 2 | 0.044685 | 0.041674 | 0.932603 | 0.057384 | 0.049396 | 0.860792 | 0.020738 | 0.045178 | 2.178514 | Musician | 1.0 | Meditation |
| 3 | 0.036864 | 0.031984 | 0.867604 | 0.055613 | 0.047876 | 0.860892 | 0.018599 | 0.038294 | 2.058977 | Musician | 1.0 | Salsa |
| 4 | 0.040921 | 0.037253 | 0.910350 | 0.054142 | 0.045282 | 0.836363 | 0.018484 | 0.031184 | 1.687072 | Musician | 1.0 | EDM |
# type = 'Silence 1'
for type_idx in range(6):
# mus_sil = statistics[statistics['part'] == type][statistics['class'] == group_1[0]]
# not_mus_sil = statistics[statistics['part'] == type][statistics['class'] == group_2[0]]
mus_sil = statistics[statistics['part'] == stim_types[type_idx]][statistics['class'] == group_1[0]]
not_mus_sil = statistics[statistics['part'] == stim_types[type_idx]][statistics['class'] == group_2[0]]
group_names = [group_1[0], group_2[0]]
# groups = [mus_edm, not_mus_edm]
groups = [mus_sil, not_mus_sil]
print(stim_types[type_idx], '++++++++++++++++++++++++++++++++++++++++++++++++++++')
for group in range(len(groups)):
print(f'========== {group_names[group]} ===========')
for ax in axes:
print(f'{ax}_mean: ', np.mean(groups[group][f'{ax}_mean']))
print(f'{ax}_std: ', np.mean(groups[group][f'{ax}_std']))
print(f'{ax}_var: ', np.mean(groups[group][f'{ax}_var']))
print(' --------------------')
Silence 1 ++++++++++++++++++++++++++++++++++++++++++++++++++++ ========== Musician =========== X_mean: 0.03977856309384901 X_std: 0.03301791326340886 X_var: 0.8338375236292404 -------------------- Y_mean: 0.06059876646107676 Y_std: 0.04729324314746655 Y_var: 0.7838930522544764 -------------------- Z_mean: 0.015083513918986666 Z_std: 0.016546166503783218 Z_var: 1.042777816081422 -------------------- ========== Not musician =========== X_mean: 0.030738923153858993 X_std: 0.02807561029635951 X_var: 0.9007055864061421 -------------------- Y_mean: 0.04884470745124178 Y_std: 0.038486377317472384 Y_var: 0.7884655469482393 -------------------- Z_mean: 0.014780463410567046 Z_std: 0.016834150342371484 Z_var: 1.0978826502544592 -------------------- Middle Silence ++++++++++++++++++++++++++++++++++++++++++++++++++++ ========== Musician =========== X_mean: 0.040589133333333326 X_std: 0.032435191312290265 X_var: 0.7996648008045556 -------------------- Y_mean: 0.059848399999999954 Y_std: 0.048634974742599514 Y_var: 0.8133529010946756 -------------------- Z_mean: 0.016105266666667346 Z_std: 0.020990825243717544 Z_var: 1.1991877914330629 -------------------- ========== Not musician =========== X_mean: 0.031049666666666864 X_std: 0.02825359116113165 X_var: 0.8997028215040362 -------------------- Y_mean: 0.049260766666666816 Y_std: 0.038405677505870525 Y_var: 0.7807521711836806 -------------------- Z_mean: 0.014317366666666437 Z_std: 0.015730316969779105 Z_var: 1.088968143857957 -------------------- Meditation ++++++++++++++++++++++++++++++++++++++++++++++++++++ ========== Musician =========== X_mean: 0.04131576666666671 X_std: 0.033534868251273324 X_var: 0.810404506823523 -------------------- Y_mean: 0.05781180000000026 Y_std: 0.04823352661356287 Y_var: 0.8336078977395097 -------------------- Z_mean: 0.014618300000000903 Z_std: 0.01827103828047879 Z_var: 1.1384603228766372 -------------------- ========== Not musician =========== X_mean: 0.03224146666666637 X_std: 0.033632359763706744 X_var: 0.9949737785627171 -------------------- Y_mean: 0.050705500000000105 Y_std: 0.04261161450386851 Y_var: 0.8419073829006767 -------------------- Z_mean: 0.01457253333333347 Z_std: 0.0154463254705341 Z_var: 1.020710842642633 -------------------- Salsa ++++++++++++++++++++++++++++++++++++++++++++++++++++ ========== Musician =========== X_mean: 0.0403845333333332 X_std: 0.033264111277103506 X_var: 0.8280126443510774 -------------------- Y_mean: 0.057631399999999874 Y_std: 0.04659053175668944 Y_var: 0.8060892743598057 -------------------- Z_mean: 0.014335933333333273 Z_std: 0.016835331247947193 Z_var: 1.1040847845095914 -------------------- ========== Not musician =========== X_mean: 0.0318899999999998 X_std: 0.030208502867868074 X_var: 0.9314851922466773 -------------------- Y_mean: 0.049772866666666436 Y_std: 0.03881094413388584 Y_var: 0.7830140444777198 -------------------- Z_mean: 0.014105233333332901 Z_std: 0.014381571885558489 Z_var: 0.9825526854088171 -------------------- EDM ++++++++++++++++++++++++++++++++++++++++++++++++++++ ========== Musician =========== X_mean: 0.04161233333333312 X_std: 0.0339072369991116 X_var: 0.816037329406574 -------------------- Y_mean: 0.05516343333333357 Y_std: 0.0446881874539185 Y_var: 0.8116047924078229 -------------------- Z_mean: 0.016088699999999578 Z_std: 0.017258624173533105 Z_var: 1.0523433106901705 -------------------- ========== Not musician =========== X_mean: 0.03228063333333281 X_std: 0.02812539101778455 X_var: 0.8646351754801126 -------------------- Y_mean: 0.051045166666666676 Y_std: 0.041275076681693314 Y_var: 0.8027373766272332 -------------------- Z_mean: 0.01611406666666634 Z_std: 0.017418892047189428 Z_var: 1.0674092535513995 -------------------- Silence 2 ++++++++++++++++++++++++++++++++++++++++++++++++++++ ========== Musician =========== X_mean: 0.04668603333333314 X_std: 0.040081644230666014 X_var: 0.8607833454917259 -------------------- Y_mean: 0.06489206666666672 Y_std: 0.05501963151311475 Y_var: 0.8535020349163085 -------------------- Z_mean: 0.01522736666666609 Z_std: 0.02273809699843083 Z_var: 1.4131018869621632 -------------------- ========== Not musician =========== X_mean: 0.03331943333333398 X_std: 0.030687025587910442 X_var: 0.9068540845976081 -------------------- Y_mean: 0.050838700000000125 Y_std: 0.03979848380069851 Y_var: 0.781291659827718 -------------------- Z_mean: 0.014815433333333444 Z_std: 0.014803674125751088 Z_var: 0.9679895617933563 --------------------
<ipython-input-373-ca04683d05c0>:7: UserWarning: Boolean Series key will be reindexed to match DataFrame index. mus_sil = statistics[statistics['part'] == stim_types[type_idx]][statistics['class'] == group_1[0]] <ipython-input-373-ca04683d05c0>:8: UserWarning: Boolean Series key will be reindexed to match DataFrame index. not_mus_sil = statistics[statistics['part'] == stim_types[type_idx]][statistics['class'] == group_2[0]]
group_names = [group_1[0], group_2[0]]
# groups = [mus_edm, not_mus_edm]
groups = [mus_sil, not_mus_sil]
print('Silence 2')
for group in range(len(groups)):
print(f'========== {group_names[group]} ===========')
for ax in axes:
print(f'{ax}_mean: ', np.mean(groups[group][f'{ax}_mean']))
print(f'{ax}_std: ', np.mean(groups[group][f'{ax}_std']))
print(f'{ax}_var: ', np.mean(groups[group][f'{ax}_var']))
print(' -------------------- ')
Silence 2 ========== Musician =========== X_mean: 0.04668603333333314 X_std: 0.040081644230666014 X_var: 0.8607833454917259 -------------------- Y_mean: 0.06489206666666672 Y_std: 0.05501963151311475 Y_var: 0.8535020349163085 -------------------- Z_mean: 0.01522736666666609 Z_std: 0.02273809699843083 Z_var: 1.4131018869621632 -------------------- ========== Not musician =========== X_mean: 0.03331943333333398 X_std: 0.030687025587910442 X_var: 0.9068540845976081 -------------------- Y_mean: 0.050838700000000125 Y_std: 0.03979848380069851 Y_var: 0.781291659827718 -------------------- Z_mean: 0.014815433333333444 Z_std: 0.014803674125751088 Z_var: 0.9679895617933563 --------------------
import librosa
import librosa.display
y, sr = librosa.load('./data/sound_data/still_stand_salsa.wav')
tempo, beat_frames = librosa.beat.beat_track(y=y, sr=sr)
beat_times = librosa.frames_to_time(beat_frames, sr=sr)
D = librosa.stft(y) # STFT of y
S_db = librosa.amplitude_to_db(np.abs(D), ref=np.max)
print('Tempo:', tempo, 'BeatFrames', beat_frames)
Tempo: 95.703125 BeatFrames [ 12 39 67 98 125 147 174 201 228 255 281 308 335 363 389 417 443 470 497 529 556 583 615 642 664 691 718 745 771 798 825 852 874 901 928 955 982 1008 1035 1062 1089 1116 1143 1170 1196 1219 1245 1272 1299 1326 1353 1380 1407 1434 1461 1487 1514 1541 1569 1595 1622 1649 1676 1703 1735 1762 1789 1821 1848 1870 1897 1924 1950 1977 2004 2031 2058 2085 2112 2139 2166 2193 2220 2247 2274 2300 2327 2354 2381 2408 2435 2462 2488 2511 2537]
fig, ax = plt.subplots(figsize=([30, 5]))
chroma = librosa.feature.chroma_cqt(y=y, sr=sr)
# img = librosa.display.specshow(S_db, x_axis='time', y_axis='linear', ax=ax)
img = librosa.display.specshow(chroma, y_axis='chroma', x_axis='time',
key='Eb:maj', ax=ax)
ax.set(title='Now with labeled axes!')
fig.colorbar(img, ax=ax, format="%+2.f dB")
<matplotlib.colorbar.Colorbar at 0x7f77d7e2d9d0>
# fig, ax = plt.subplots(figsize=([30, 5]))
# chroma = librosa.feature.chroma_cqt(y=np.array(stimuled_humans[2][4]['dX']), sr=100)
# # img = librosa.display.specshow(S_db, x_axis='time', y_axis='linear', ax=ax)
# img = librosa.display.specshow(chroma, y_axis='chroma', x_axis='time',
# key='Eb:maj', ax=ax)
# ax.set(title='Now with labeled axes!')
# fig.colorbar(img, ax=ax, format="%+2.f dB")
type_idx = 3
f, t, Sxx = signal.spectrogram(np.array(stimuled_humans[1][type_idx]['dX']), 100, window=('exponential', 0.2), nperseg=200)
# f, t, Sxx = signal.spectrogram(np.array(signals[i][0]))
fig, axs = plt.subplots(1, 1, figsize=([23, 3]))
# axs.set_title(f'Spectogram signal - {stim_types[i]} X - Human {human}')
axs.pcolormesh(t, f, Sxx, shading='gouraud')
axs.set_xlabel('Time [sec]')
axs.set_ylabel('Frequency [Hz]')
axs.grid(True)
f, t, Sxx = signal.spectrogram(np.array(stimuled_humans[2][type_idx]['dX']), 100, window=('exponential', 0.2), nperseg=200)
# f, t, Sxx = signal.spectrogram(np.array(signals[i][0]))
fig, axs = plt.subplots(1, 1, figsize=([23, 3]))
# axs.set_title(f'Spectogram signal - {stim_types[i]} X - Human {human}')
axs.pcolormesh(t, f, Sxx, shading='gouraud')
axs.set_xlabel('Time [sec]')
axs.set_ylabel('Frequency [Hz]')
axs.grid(True)
f, t, Sxx = signal.spectrogram(np.array(stimuled_humans[6][type_idx]['dX']), 100, window=('exponential', 0.2), nperseg=200)
# f, t, Sxx = signal.spectrogram(np.array(signals[i][0]))
fig, axs = plt.subplots(1, 1, figsize=([23, 3]))
# axs.set_title(f'Spectogram signal - {stim_types[i]} X - Human {human}')
axs.pcolormesh(t, f, Sxx, shading='gouraud')
axs.set_xlabel('Time [sec]')
axs.set_ylabel('Frequency [Hz]')
axs.grid(True)
fig, ax = plt.subplots(figsize=([30, 5]))
chroma = librosa.feature.chroma_cqt(y=y, sr=sr)
# img = librosa.display.specshow(S_db, x_axis='time', y_axis='linear', ax=ax)
img = librosa.display.specshow(chroma, y_axis='chroma', x_axis='time',
key='Eb:maj', ax=ax)
ax.set(title=stim_types[type_idx])
fig.colorbar(img, ax=ax, format="%+2.f dB")
print(stim_types[type_idx])
Salsa
stimuled_humans[2][\4 4]['dX']
24000 0.090
24001 0.084
24002 0.118
24003 0.076
24004 0.107
...
29995 -0.063
29996 -0.070
29997 -0.093
29998 -0.079
29999 -0.094
Name: dX, Length: 6000, dtype: float64
# n_mfcc = 40
n_fft = 2048
hop_length = 512
num_seggments = 5
# tempo, beat_frames = librosa.beat.beat_track(y=y, sr=sr)
# beat_times = librosa.frames_to_time(beat_frames, sr=sr)
# D = librosa.stft(y) # STFT of y
# S_db = librosa.amplitude_to_db(np.abs(D), ref=np.max)
# print('Tempo:', tempo, 'BeatFrames', beat_frames)
mfcc = librosa.feature.mfcc(y=y,
sr=sr)
# mfcc.shape
mfcc = mfcc.T
# mel_array.append(mfcc)
fig, ax = plt.subplots(figsize=([30, 5]))
img = librosa.display.specshow(mfcc, x_axis='time', ax=ax)
fig.colorbar(img, ax=ax)
ax.set(title='MFCC')
[Text(0.5, 1.0, 'MFCC')]